From: Jan Beulich Date: Fri, 29 Aug 2014 10:22:42 +0000 (+0200) Subject: EPT: utilize GLA->GPA translation known for certain faults X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~4496 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22?a=commitdiff_plain;h=ecb69533582e51999e5d76bce513be870222908f;p=xen.git EPT: utilize GLA->GPA translation known for certain faults Rather than doing the translation ourselves in __hvmemul_{read,write}() leverage that we know the association for faults other than such having occurred when translating addresses of page tables. There is one intentional but not necessarily obvious (and possibly subtle) adjustment to behavior: __hvmemul_read() no longer blindly bails on instruction fetches matching the MMIO GVA (the callers of handle_mmio_with_translation() now control the behavior via the struct npfec they pass, and it didn't seem right to bail here rather than just falling through to the unaccelerated path) Signed-off-by: Jan Beulich Reviewed-by: Tim Deegan --- diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index eac159f434..86cf43273a 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -481,10 +481,11 @@ static int __hvmemul_read( while ( off & (chunk - 1) ) chunk >>= 1; - if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva ) + if ( ((access_type != hvm_access_insn_fetch + ? vio->mmio_access.read_access + : vio->mmio_access.insn_fetch)) && + (vio->mmio_gva == (addr & PAGE_MASK)) ) { - if ( access_type == hvm_access_insn_fetch ) - return X86EMUL_UNHANDLEABLE; gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off); while ( (off + chunk) <= PAGE_SIZE ) { @@ -624,7 +625,8 @@ static int hvmemul_write( while ( off & (chunk - 1) ) chunk >>= 1; - if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva ) + if ( vio->mmio_access.write_access && + (vio->mmio_gva == (addr & PAGE_MASK)) ) { gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off); while ( (off + chunk) <= PAGE_SIZE ) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 0363714386..83e6faefb1 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2788,7 +2788,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, && is_hvm_vcpu(v) && hvm_mmio_internal(gpa) ) { - if ( !handle_mmio() ) + if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) ) hvm_inject_hw_exception(TRAP_gp_fault, 0); rc = 1; goto out; @@ -2862,7 +2862,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, if ( unlikely(is_pvh_vcpu(v)) ) goto out; - if ( !handle_mmio() ) + if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) ) hvm_inject_hw_exception(TRAP_gp_fault, 0); rc = 1; goto out; diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index b2b7b2736c..9f565d612b 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -95,7 +95,7 @@ int handle_mmio(void) if ( vio->io_state == HVMIO_awaiting_completion ) vio->io_state = HVMIO_handle_mmio_awaiting_completion; else - vio->mmio_gva = 0; + vio->mmio_access = (struct npfec){}; switch ( rc ) { @@ -124,9 +124,14 @@ int handle_mmio(void) return 1; } -int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn) +int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn, + struct npfec access) { struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io; + + vio->mmio_access = access.gla_valid && + access.kind == npfec_kind_with_gla + ? access : (struct npfec){}; vio->mmio_gva = gva & PAGE_MASK; vio->mmio_gpfn = gpfn; return handle_mmio(); diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index c6c9d10ee2..225290e765 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -2824,6 +2824,11 @@ static int sh_page_fault(struct vcpu *v, p2m_type_t p2mt; uint32_t rc; int version; + struct npfec access = { + .read_access = 1, + .gla_valid = 1, + .kind = npfec_kind_with_gla + }; #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION int fast_emul = 0; #endif @@ -2834,6 +2839,9 @@ static int sh_page_fault(struct vcpu *v, perfc_incr(shadow_fault); + if ( regs->error_code & PFEC_write_access ) + access.write_access = 1; + #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION /* If faulting frame is successfully emulated in last shadow fault * it's highly likely to reach same emulation action for this frame. @@ -2935,7 +2943,7 @@ static int sh_page_fault(struct vcpu *v, SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa); reset_early_unshadow(v); trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va); - return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT) + return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access) ? EXCRET_fault_fixed : 0); } else @@ -3424,7 +3432,7 @@ static int sh_page_fault(struct vcpu *v, paging_unlock(d); put_gfn(d, gfn_x(gfn)); trace_shadow_gen(TRC_SHADOW_MMIO, va); - return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT) + return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access) ? EXCRET_fault_fixed : 0); not_a_shadow_fault: diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h index c7ac566460..886a9d616f 100644 --- a/xen/include/asm-x86/hvm/io.h +++ b/xen/include/asm-x86/hvm/io.h @@ -119,7 +119,8 @@ static inline void register_buffered_io_handler( void send_timeoffset_req(unsigned long timeoff); void send_invalidate_req(void); int handle_mmio(void); -int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn); +int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn, + struct npfec); int handle_pio(uint16_t port, unsigned int size, int dir); void hvm_interrupt_post(struct vcpu *v, int vector, int type); void hvm_io_assist(ioreq_t *p); diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index db37232395..01e0665beb 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -54,8 +54,9 @@ struct hvm_vcpu_io { * HVM emulation: * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn. * The latter is known to be an MMIO frame (not RAM). - * This translation is only valid if @mmio_gva is non-zero. + * This translation is only valid for accesses as per @mmio_access. */ + struct npfec mmio_access; unsigned long mmio_gva; unsigned long mmio_gpfn;